Advanced Lane Finding Project

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
In [1]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# from scipy.misc import imsave
import glob
import pickle

# use ipywidgets to change the threshold values interactively.
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from moviepy.editor import VideoFileClip
from IPython.display import HTML

%matplotlib inline

Calibrate the Camera

Find the calibration image corners

In [2]:
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
In [3]:
# prepare object points
nx = 9 # the number of inside corners in x
ny = 6 # the number of inside corners in y

# Arrays to store object points and image points from all the images
objpoints = [] # 3D point in real world space
imgpoints = [] # 2D point in image plane

img_corners = []

# Prepare object points, like (0, 0, 0), (1, 0, 0), (2, 0, 0), ... (8, 5, 0)
objp = np.zeros((ny*nx, 3),np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2) # x,y coordinates

Load the calibration images and try to find the corners.

In [4]:
# Load the images

i = 0
j = 0

for fname in images:
    
    img = cv2.imread(fname)
        
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
    i = i + 1
    
    # If found, draw corners
    if ret == True:
        # Draw and display the corners
        j = j + 1
        imgpoints.append(corners)
        objpoints.append(objp)
        img_corners.append(cv2.drawChessboardCorners(img, (nx, ny), corners, ret))  
    
print('Calibration Image Corners:', j, "images found the corners out of", i, "images.")
figs = j // 2
plt.figure(figsize=(15,40))
for i in range(j):
    plt.subplot(figs + 1, 2, i+1)
    plt.imshow(img_corners[i])

plt.show()
Calibration Image Corners: 17 images found the corners out of 20 images.

Undistort an image

In [5]:
# Test undistortion on an image
img = cv2.imread('./camera_cal/calibration1.jpg')
img_size = (img.shape[1], img.shape[0])

# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)

# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "calibration.p", "wb" ) )
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)

# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
Out[5]:
<matplotlib.text.Text at 0x12b348978>
In [6]:
# Several utility functions

# undistort image using camera calibration matrix from above
def undistort(img):
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    return undist

# Undistort function
def cal_undistort(img, objpoints, imgpoints):
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    return undist, mtx, dist

# Warping function
def corners_unwarp(img, nx, ny, mtx, dist, corners):   
    offset = 100 # offset for dst points
    
    # Grab the image shape
    img_size = (img.shape[1], img.shape[0])
   
    # Source and destination points
    src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
    dst = np.float32([[offset, offset], [img_size[0]-offset, offset], 
                            [img_size[0]-offset, img_size[1]-offset], 
                            [offset, img_size[1]-offset]])
    
    # Given src and dst points, calculate the perspective transform matrix
    M = cv2.getPerspectiveTransform(src, dst)
    # Warp the image using OpenCV warpPerspective()
    warped = cv2.warpPerspective(img, M, img_size)

    # Return the resulting image and matrix
    return warped, M

Now plot an good example of the undistorted checkerboard image

In [7]:
# Calibrate image
img1 = cv2.imread('./camera_cal/calibration4.jpg')
print(img1.shape)
undist_ex, mtx1, dist1 = cal_undistort(img1, objpoints, imgpoints)

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img1)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(undist_ex)
ax2.set_title('Undistorted Image', fontsize=30)
(720, 1280, 3)
Out[7]:
<matplotlib.text.Text at 0x12b49a160>

Undistort and warp images using perspective transformation.

In [8]:
img = cv2.imread('./camera_cal/calibration7.jpg')

# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)

# Undistort the image
undistorted, mtx, dist = cal_undistort(img, objpoints, imgpoints)

# Warp image
img_warped, M = corners_unwarp(undistorted, nx, ny, mtx, dist, corners)

# Now plot an example of the undistorted/warped image
# Calibrate image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(img_warped)
ax2.set_title('Undistorted Warped Image', fontsize=30)
Out[8]:
<matplotlib.text.Text at 0x12a692588>

Apply a distortion correction to raw images.

In [9]:
exampleImg = cv2.imread('./test_images/straight_lines1.jpg')
exampleImg = cv2.cvtColor(exampleImg, cv2.COLOR_BGR2RGB)

# Calibrate image
exampleImg_undistort = cv2.undistort(exampleImg, mtx, dist, None, mtx)

# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
ax1.imshow(exampleImg)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(exampleImg_undistort)
ax2.set_title('Undistorted Image', fontsize=30)
Out[9]:
<matplotlib.text.Text at 0x12a792080>

Try another images with curved lane

In [10]:
exampleImg = cv2.imread('./test_images/test3.jpg')
exampleImg = cv2.cvtColor(exampleImg, cv2.COLOR_BGR2RGB)

# Calibrate image
exampleImg_undistort = cv2.undistort(exampleImg, mtx, dist, None, mtx)

# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
ax1.imshow(exampleImg)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(exampleImg_undistort)
ax2.set_title('Undistorted Image', fontsize=30)
Out[10]:
<matplotlib.text.Text at 0x12a74be48>
In [11]:
def unwarp(img, src, dst):
    h,w = img.shape[:2]
    # use cv2.getPerspectiveTransform() to get M, the transform matrix, and Minv, the inverse
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # use cv2.warpPerspective() to warp your image to a top-down view
    warped = cv2.warpPerspective(img, M, (w,h), flags=cv2.INTER_LINEAR)
    return warped, M, Minv

Apply unwarping to get a bird eye view on the lane

In [12]:
h,w = exampleImg_undistort.shape[:2]

# # define source and destination points for transform
# src = np.float32([(575,464),
#                   (707,464), 
#                   (258,682), 
#                   (1049,682)])
# dst = np.float32([(450,0),
#                   (w-450,0),
#                   (450,h),
#                   (w-450,h)])

# Four Source Points
src = np.float32(
    [[580, 460],
     [200, 720],
     [706, 460],
     [1140, 720]])

# Four Destination Points
dst = np.float32(
    [[200, 100],
     [200, 720],
     [1040, 100],
     [1040, 720]])
    
exampleImg_unwarp, M, Minv = unwarp(exampleImg_undistort, src, dst)

# Visualize unwarp
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
ax1.imshow(exampleImg_undistort)
x = [src[0][0],src[2][0],src[3][0],src[1][0],src[0][0]]
y = [src[0][1],src[2][1],src[3][1],src[1][1],src[0][1]]
ax1.plot(x, y, color='#33cc99', alpha=0.4, linewidth=3, solid_capstyle='round', zorder=2)
ax1.set_ylim([h,0])
ax1.set_xlim([0,w])
ax1.set_title('Undistorted Image', fontsize=30)
ax2.imshow(exampleImg_unwarp)
ax2.set_title('Unwarped Image', fontsize=30)
Out[12]:
<matplotlib.text.Text at 0x12b525da0>

Visualize multiple colorspace channels

In [13]:
# Visualize multiple color space channels
exampleImg_unwarp_R = exampleImg_unwarp[:,:,0]
exampleImg_unwarp_G = exampleImg_unwarp[:,:,1]
exampleImg_unwarp_B = exampleImg_unwarp[:,:,2]
exampleImg_unwarp_HSV = cv2.cvtColor(exampleImg_unwarp, cv2.COLOR_RGB2HSV)
exampleImg_unwarp_H = exampleImg_unwarp_HSV[:,:,0]
exampleImg_unwarp_S = exampleImg_unwarp_HSV[:,:,1]
exampleImg_unwarp_V = exampleImg_unwarp_HSV[:,:,2]
exampleImg_unwarp_LAB = cv2.cvtColor(exampleImg_unwarp, cv2.COLOR_RGB2Lab)
exampleImg_unwarp_L = exampleImg_unwarp_LAB[:,:,0]
exampleImg_unwarp_A = exampleImg_unwarp_LAB[:,:,1]
exampleImg_unwarp_B2 = exampleImg_unwarp_LAB[:,:,2]

fig, axs = plt.subplots(3,3, figsize=(16, 12))
# fig.subplots_adjust(hspace = .2, wspace=.001)
axs = axs.ravel()
axs[0].imshow(exampleImg_unwarp_R, cmap='gray')
axs[0].set_title('RGB R-channel', fontsize=30)
axs[1].imshow(exampleImg_unwarp_G, cmap='gray')
axs[1].set_title('RGB G-Channel', fontsize=30)
axs[2].imshow(exampleImg_unwarp_B, cmap='gray')
axs[2].set_title('RGB B-channel', fontsize=30)
axs[3].imshow(exampleImg_unwarp_H, cmap='gray')
axs[3].set_title('HSV H-Channel', fontsize=30)
axs[4].imshow(exampleImg_unwarp_S, cmap='gray')
axs[4].set_title('HSV S-channel', fontsize=30)
axs[5].imshow(exampleImg_unwarp_V, cmap='gray')
axs[5].set_title('HSV V-Channel', fontsize=30)
axs[6].imshow(exampleImg_unwarp_L, cmap='gray')
axs[6].set_title('LAB L-channel', fontsize=30)
axs[7].imshow(exampleImg_unwarp_A, cmap='gray')
axs[7].set_title('LAB A-Channel', fontsize=30)
axs[8].imshow(exampleImg_unwarp_B2, cmap='gray')
axs[8].set_title('LAB B-Channel', fontsize=30)
Out[13]:
<matplotlib.text.Text at 0x127526b00>

Define several filters - sobel absolute, magnitude of gradient, sobel direction, etc.

We use ipywidgets and interact_manual functions to test multiple threshold values by setting them manually with the UI widgets.

In [14]:
# Define a function that applies Sobel x or y, 
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
    
    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    # 2) Take the derivative in x or y given orient = 'x' or 'y'
    # 3) Take the absolute value of the derivative or gradient
    if orient == 'x':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
    if orient == 'y':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
        
    # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))

    # Create a copy and apply the threshold
    binary_output = np.zeros_like(scaled_sobel)
    # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
    binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1

    # Return the result
    return binary_output
In [15]:
def update(min_thresh=20, max_thresh=120):
    exampleImg_sobelAbs = abs_sobel_thresh(exampleImg_unwarp, 'x', 3, thresh=(min_thresh, max_thresh))
    # Visualize sobel absolute threshold
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    f.subplots_adjust(hspace = .2, wspace=.05)
    ax1.imshow(exampleImg_unwarp)
    ax1.set_title('Unwarped Image', fontsize=30)
    ax2.imshow(exampleImg_sobelAbs, cmap='gray')
    ax2.set_title('Sobel Absolute', fontsize=30)

interact_manual(update, 
         min_thresh=(0, 255, 1), 
         max_thresh=(0, 255, 1))

# min_thread = 20, max_thread = 120
Out[15]:
<function __main__.update>
In [16]:
# Define a function to return the magnitude of the gradient
# for a given sobel kernel size and threshold values
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Take both Sobel x and y gradients
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # Calculate the gradient magnitude
    gradmag = np.sqrt(sobelx**2 + sobely**2)
    # Rescale to 8 bit
    scale_factor = np.max(gradmag)/255 
    gradmag = (gradmag/scale_factor).astype(np.uint8) 
    # Create a binary image of ones where threshold is met, zeros otherwise
    binary_output = np.zeros_like(gradmag)
    binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1

    # Return the binary image
    return binary_output
In [17]:
def update(kernel_size=15, min_thresh=9, max_thresh=150):
    exampleImg_sobelMag = mag_thresh(exampleImg_unwarp, kernel_size, (min_thresh, max_thresh))
    # Visualize sobel magnitude threshold
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    f.subplots_adjust(hspace = .2, wspace=.05)
    ax1.imshow(exampleImg_unwarp)
    ax1.set_title('Unwarped Image', fontsize=30)
    ax2.imshow(exampleImg_sobelMag, cmap='gray')
    ax2.set_title('Sobel Magnitude', fontsize=30)

interact_manual(update, kernel_size=(1,31,2), 
                 min_thresh=(0,255), 
                 max_thresh=(0,255))

# kernel_size = 15, min_thread = 10, max_thread = 150
Out[17]:
<function __main__.update>
In [18]:
# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
    # Grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Calculate the x and y gradients
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # Take the absolute value of the gradient direction, 
    # apply a threshold, and create a binary image result
    absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
    binary_output =  np.zeros_like(absgraddir)
    binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1

    # Return the binary image
    return binary_output
In [19]:
def update(kernel_size=5, min_thresh=0.15, max_thresh=0.6):
    exampleImg_sobelDir = dir_threshold(exampleImg_unwarp, kernel_size, thresh=(min_thresh, max_thresh))
    # Visualize sobel direction threshold
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    f.subplots_adjust(hspace = .2, wspace=.05)
    ax1.imshow(exampleImg_unwarp)
    ax1.set_title('Unwarped Image', fontsize=30)
    ax2.imshow(exampleImg_sobelDir, cmap='gray')
    ax2.set_title('Sobel Direction', fontsize=30)

interact_manual(update, kernel_size=(1,31,2), 
                 min_thresh=(0,np.pi/2,0.01), 
                 max_thresh=(0,np.pi/2,0.01))

# kernel_size = 5, min_thread = 0.15, max_thread = 0.60
Out[19]:
<function __main__.update>
In [20]:
def filter_threshold(img_unwarp):
    # Apply each of the thresholding functions
    gradx = abs_sobel_thresh(img_unwarp, orient='x', sobel_kernel=3, thresh=(20, 120))
    # grady = abs_sobel_thresh(bird, orient='y', sobel_kernel=ksize, thresh=(20, 100))
    mag_binary = mag_thresh(img_unwarp, sobel_kernel=15, mag_thresh=(10, 150))
    dir_binary = dir_threshold(img_unwarp, sobel_kernel=5, thresh=(0.16, 0.61))

    binary_warped = np.zeros_like(dir_binary)
    # combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
    binary_warped[((gradx == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
    
    return binary_warped
In [21]:
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(exampleImg_unwarp, orient='x', sobel_kernel=3, thresh=(20, 120))
# grady = abs_sobel_thresh(bird, orient='y', sobel_kernel=ksize, thresh=(20, 100))
mag_binary = mag_thresh(exampleImg_unwarp, sobel_kernel=3, mag_thresh=(10, 150))
dir_binary = dir_threshold(exampleImg_unwarp, sobel_kernel=5, thresh=(0.16, 0.61))

binary_warped = np.zeros_like(dir_binary)
# combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
binary_warped[((gradx == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1


# combined = np.zeros_like(dir_binary)
# combined[((gradx == 1) & (grady == 1)) | (mag_binary == 1) | (dir_binary == 1)] = 1

# Now plot an example of the directional gradient
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(exampleImg_unwarp)
ax1.set_title('Undistorted Image', fontsize=30)
ax2.imshow(binary_warped, cmap='gray')
ax2.set_title('Sobal Threshold + Magnitude + Direction Image', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
In [22]:
# Define a function that thresholds the S-channel of HLS
# Use exclusive lower bound (>) and inclusive upper (<=)
def hls_sthresh(img, thresh=(125, 255)):
    # 1) Convert to HLS color space
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    # 2) Apply a threshold to the S channel
    binary_output = np.zeros_like(hls[:,:,2])
    binary_output[(hls[:,:,2] > thresh[0]) & (hls[:,:,2] <= thresh[1])] = 1
    # 3) Return a binary image of threshold result
    return binary_output
In [23]:
def update(min_thresh=170, max_thresh=220):
    exampleImg_SThresh = hls_sthresh(exampleImg_unwarp, (min_thresh, max_thresh))
    # Visualize hls s-channel threshold
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    f.subplots_adjust(hspace = .2, wspace=.05)
    ax1.imshow(exampleImg_unwarp)
    ax1.set_title('Unwarped Image', fontsize=30)
    ax2.imshow(exampleImg_SThresh, cmap='gray')
    ax2.set_title('HLS S-Channel', fontsize=30)

interact_manual(update,
         min_thresh=(0,255), 
         max_thresh=(0,255))

# min_thresh = 170, max_thresh = 220
Out[23]:
<function __main__.update>

We found that HLS S-channel is useful to find the lane in bright background of the road.

In [24]:
# Define the complete image processing pipeline.
# Given an image data, undistort, unwarp, and then applies series of filters, 
# and then construct a binary images resulting from the filters.

def pipeline(img):
    # Undistort
    img_undistort = undistort(img)
    
    # Perspective Transform
    img_unwarp, M, Minv = unwarp(img_undistort, src, dst)

    gradx = abs_sobel_thresh(img_unwarp, orient='x', sobel_kernel=3, thresh=(20, 120))
    # grady = abs_sobel_thresh(bird, orient='y', sobel_kernel=ksize, thresh=(20, 100))
    mag_binary = mag_thresh(img_unwarp, sobel_kernel=15, mag_thresh=(10, 150))
    dir_binary = dir_threshold(img_unwarp, sobel_kernel=5, thresh=(0.16, 0.61))

    # HLS S-Channel
    hls_s_binary = hls_sthresh(img_unwarp, (170, 220))

    binary_warped = np.zeros_like(dir_binary)
    # combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
    binary_warped[((gradx == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (hls_s_binary == 1)] = 1
    
    return img_undistort, binary_warped, Minv

Appy the pipleline to the test images, and check the results.

In [25]:
# Make a list of example images
images = glob.glob('./test_images/*.jpg')
                                          
# Set up plot
# plt.figure(figsize=(10,20))
fig, axs = plt.subplots(len(images),2, figsize=(15, 30))
# fig.subplots_adjust(hspace = .2, wspace=.001)
axs = axs.ravel()
                  
i = 0
for image in images:
    img = cv2.imread(image)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_undistort, img_bin, Minv = pipeline(img)
    print(image)
    axs[i].imshow(img)
    axs[i].axis('off')
    i += 1
    axs[i].imshow(img_bin, cmap='gray')
    axs[i].axis('off')
    i += 1
    
./test_images/test6.jpg
./test_images/test5.jpg
./test_images/test4.jpg
./test_images/test1.jpg
./test_images/test3.jpg
./test_images/test2.jpg
./test_images/straight_lines2.jpg
./test_images/straight_lines1.jpg

Select an image and then apply the pipleline, get the histogram and fund the curvature of the lanes.

In [26]:
img = cv2.imread('./test_images/test5.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_undistort, binary_warped, Minv = pipeline(img)
In [27]:
def hist_binary(img):
    histogram = np.sum(img[img.shape[0]//2:,:], axis=0)
    return histogram


bird_hist = hist_binary(binary_warped[:,:])
plt.plot(bird_hist)
Out[27]:
[<matplotlib.lines.Line2D at 0x13217b160>]
In [28]:
# Use histogram from step 4a
histogram = bird_hist #np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
detected = False
count = 0
store_left_fit = np.zeros((6,3))
store_right_fit = np.zeros((6,3))
global count, detected, left_fit, right_fit, store_left_fit, store_right_fit
In [29]:
def sliding_windows(histogram, binary_warped, count):
    # Create an output image to draw on and  visualize the result
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255

    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]/2)
    leftx_base = np.argmax(histogram[150:midpoint]) + 150
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # Choose the number of sliding windows
    nwindows = 9
    # Set height of windows
    window_height = np.int(binary_warped.shape[0]/nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    # Current positions to be updated for each window
    leftx_current = leftx_base
    rightx_current = rightx_base
    
    # Set the width of the windows +/- margin
    margin = 120

    # Set minimum number of pixels found to recenter window
    minpix = 60

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        
        # Draw the windows on the visualization image
        cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
        cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) 
    
        # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
    
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
    
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)
    
    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 
        
    # Lane prediction confidence (hardcode values determined from straight line images)
    left_conf = 1. - abs(11000. - len(left_lane_inds))/ 15000.
#     right_conf = 1. - abs(good_right_inds - len(right_lane_inds))/ 8000.
    right_conf = 1. - abs(3100. - len(right_lane_inds))/ 6200.

    if len(left_lane_inds) < 11000:
        left_conf = 1.
        
    if left_conf < -2 and right_conf > 0:
        leftx = rightx - 840
        lefty = righty
    
    if left_conf < 0:
        left_conf = 0
        
    right_conf = 1. - abs(3100. - len(right_lane_inds))/ 6200.
    if len(right_lane_inds) < 3100:
        right_conf = 1.
        
    if right_conf < -2 and left_conf > 0:
        rightx = leftx + 840
        righty = lefty
        
    if right_conf < 0:
        right_conf = 0
        
    # Check lane width
    if np.mean(rightx) - np.mean(leftx) > 1000 or np.mean(rightx) - np.mean(leftx) < 720:
        if left_conf >= right_conf:
            rightx = leftx + 840
            righty = lefty
        if right_conf > left_conf:
            leftx = rightx - 840
            lefty = righty
    
    # Check to make sure that pixels exist for each lane
    if rightx.size == 0 or righty.size == 0:
        rightx = leftx + 840
        righty = lefty
        detected = False
    else:
        detected = True
          
    if leftx.size == 0 or lefty.size == 0:
        leftx = rightx - 840
        lefty = righty
        detected = False
    else:
        detected = True   
        
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
        
    # Store fits for past 20 frames
    store_left_fit[count,:] = left_fit
    store_right_fit[count,:] = right_fit  
    diff_left = np.zeros((2))
    diff_right = np.zeros((2))
    
    # Check Max and min values of Fit Coefficient
    # Determined max/min values from plots of 2nd order curves
    if store_left_fit[count,0] > 0.0025:
        store_left_fit[count,0] = 0.0025
    
    if store_left_fit[count,0] < -0.0025:
        store_left_fit[count,0] = -0.0025
        
    if store_left_fit[count,1] > 3.0:
        store_left_fit[count,1] = 3.0
    
    if store_left_fit[count,1] < -3.0:
        store_left_fit[count,1] = -3.0
        
    if store_right_fit[count,0] > 0.0025:
        store_right_fit[count,0] = 0.0025
    
    if store_right_fit[count,0] < -0.0025:
        store_right_fit[count,0] = -0.0025
        
    if store_right_fit[count,1] > 3.0:
        store_right_fit[count,1] = 3.0
    
    if store_right_fit[count,1] < -3.0:
        store_right_fit[count,1] = -3.0
        
    # Check the current step fit values and remove outliers
    #if count > 15:
    #    diff_left[0] = abs(store_left_fit[count,0] - store_left_fit[count-1,0]) / store_left_fit[count,0])
    #    diff_left[1] = abs((store_left_fit[count,1] - store_left_fit[count-1,1]) / store_left_fit[count,1])
        
    #    diff_right[0] = abs((store_right_fit[count,0] - store_right_fit[count-1,0]) / store_right_fit[count,0])
    #    diff_right[1] = abs((store_right_fit[count,1] - store_right_fit[count-1,1]) / store_right_fit[count,1])
    
    #    if np.max(diff_left) > 3 or np.max(diff_right) > 3:
    #        store_left_fit[count,:] = store_left_fit[count-1,:]
    #        store_right_fit[count,:] = store_right_fit[count-1,:]
    
    # Average the final accepted fits to produce final fit

    if count > 15:
        left_fit[0] = np.mean(store_left_fit[count-15:count,0])
        left_fit[1] = np.mean(store_left_fit[count-15:count,1])
        left_fit[2] = np.mean(store_left_fit[count-15:count,2])
        right_fit[0] = np.mean(store_right_fit[count-15:count,0])
        right_fit[1] = np.mean(store_right_fit[count-15:count,1])
        right_fit[2] = np.mean(store_right_fit[count-15:count,2])
        
        
    # Visualize the output image
    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
    #plt.imshow(out_img)   
    return out_img, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf
In [30]:
out_img, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf = sliding_windows(histogram, binary_warped, count)
plt.figure(figsize=(10, 20))
plt.imshow(cv2.cvtColor(out_img.astype(np.uint8), cv2.COLOR_BGR2RGB))
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
Out[30]:
(720, 0)
In [31]:
# Assume you now have a new warped binary image 
def find_lines(left_fit, right_fit, binary_warped, count):
    
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    margin = 100
    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin))) 
    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))  

    # Again, extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    
    # Lane prediction confidence (hardcode values determined from straight line images)
    left_conf = 1. - abs(11000. - len(left_lane_inds))/ 15000.
    right_conf = 1. - abs(3100. - len(right_lane_inds))/ 6200.


    if len(left_lane_inds) < 11000:
        left_conf = 1.
        
    if left_conf < -2 and right_conf > 0:
        leftx = rightx - 840
        lefty = righty
    
    if left_conf < 0:
        left_conf = 0
        
    if len(right_lane_inds) < 3100:
        right_conf = 1.
        
    if right_conf < -2 and left_conf > 0:
        rightx = leftx + 840
        righty = lefty
        
    if right_conf < 0:
        right_conf = 0
        
    # Check lane width
    if np.mean(rightx) - np.mean(leftx) > 1000 or np.mean(rightx) - np.mean(leftx) < 720:
        if left_conf >= right_conf:
            rightx = leftx + 840
            righty = lefty
        if right_conf > left_conf:
            leftx = rightx - 840
            lefty = righty
    
    # Check to make sure that pixels exist for each lane
    if rightx.size == 0 or righty.size == 0:
        rightx = leftx + 840
        righty = lefty
        detected = False
    else:
        detected = True
          
    if leftx.size == 0 or lefty.size == 0:
        leftx = rightx - 840
        lefty = righty
        detected = False
    else:
        detected = True   
        
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
        
    # Store fits for past 20 frames
    store_left_fit[count,:] = left_fit
    store_right_fit[count,:] = right_fit  
    diff_left = np.zeros((2))
    diff_right = np.zeros((2))
    
    # Check Max and min values of Fit Coefficient
    # Determined max/min values from plots of 2nd order curves
    if store_left_fit[count,0] > 0.0025:
        store_left_fit[count,0] = 0.0025
    
    if store_left_fit[count,0] < -0.0025:
        store_left_fit[count,0] = -0.0025
        
    if store_left_fit[count,1] > 3.0:
        store_left_fit[count,1] = 3.0
    
    if store_left_fit[count,1] < -3.0:
        store_left_fit[count,1] = -3.0
        
    if store_right_fit[count,0] > 0.0025:
        store_right_fit[count,0] = 0.0025
    
    if store_right_fit[count,0] < -0.0025:
        store_right_fit[count,0] = -0.0025
        
    if store_right_fit[count,1] > 3.0:
        store_right_fit[count,1] = 3.0
    
    if store_right_fit[count,1] < -3.0:
        store_right_fit[count,1] = -3.0
        
    # Check the current step fit values and remove outliers
    #if count > 15:
    #    diff_left[0] = abs(store_left_fit[count,0] - store_left_fit[count-1,0]) / store_left_fit[count,0])
    #    diff_left[1] = abs((store_left_fit[count,1] - store_left_fit[count-1,1]) / store_left_fit[count,1])
        
    #    diff_right[0] = abs((store_right_fit[count,0] - store_right_fit[count-1,0]) / store_right_fit[count,0])
    #    diff_right[1] = abs((store_right_fit[count,1] - store_right_fit[count-1,1]) / store_right_fit[count,1])
    
    #    if np.max(diff_left) > 3 or np.max(diff_right) > 3:
    #        store_left_fit[count,:] = store_left_fit[count-1,:]
    #        store_right_fit[count,:] = store_right_fit[count-1,:]
    
    # Average the final accepted fits to produce final fit

    if count > 15:
        left_fit[0] = np.mean(store_left_fit[count-15:count,0])
        left_fit[1] = np.mean(store_left_fit[count-15:count,1])
        left_fit[2] = np.mean(store_left_fit[count-15:count,2])
        right_fit[0] = np.mean(store_right_fit[count-15:count,0])
        right_fit[1] = np.mean(store_right_fit[count-15:count,1])
        right_fit[2] = np.mean(store_right_fit[count-15:count,2])

    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

    # Create an image to draw on and an image to show the selection window
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
    window_img = np.zeros_like(out_img)

    # Color in left and right line pixels
    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]

    # Generate a polygon to illustrate the search window area
    # And recast the x and y points into usable format for cv2.fillPoly()
    left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
    left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
    left_line_pts = np.hstack((left_line_window1, left_line_window2))
    right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
    right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
    right_line_pts = np.hstack((right_line_window1, right_line_window2))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
    cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
    result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
    return result, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf

result, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf = find_lines(left_fit, right_fit, binary_warped, count)
plt.imshow(cv2.cvtColor(result.astype(np.uint8), cv2.COLOR_BGR2RGB))
#plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
Out[31]:
(720, 0)
In [32]:
# Define y-value where we want radius of curvature
def rad_curve(result, left_fit, right_fit, ploty, lefty, righty, leftx, rightx, left_conf, right_conf):
    
    y_eval = np.max(ploty)
    left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
    right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])

    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/840 # meters per pixel in x dimension
    
    # Fit new polynomials to x,y in world space
    left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
    right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
    
    # Distance from center
    car_y = result.shape[0]
    left_line_location = left_fit[0]*car_y**2 + left_fit[1]*car_y + left_fit[2] 
    right_line_location = right_fit[0]*car_y**2 + right_fit[1]*car_y + right_fit[2]
    car_center = xm_per_pix*(result.shape[1] / 2)
    lane_center = xm_per_pix*((left_line_location + right_line_location) / 2)
    dist_center = car_center - lane_center

    # Calculate the new radii of curvature
    left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
    right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
     
    # Add to image
    font = cv2.FONT_HERSHEY_DUPLEX
    cv2.putText(result,'Left Curvature: %.2f m'%(left_curverad),(60,60), font, 1,(255,255,255),2,cv2.LINE_AA)
    cv2.putText(result,'Right Curvature: %.2f m'%(right_curverad),(60,100), font, 1,(255,255,255),2,cv2.LINE_AA)
    cv2.putText(result,'Distance from Center : %.2f m'%(dist_center),(60,140), font, 1,(255,255,255),2,cv2.LINE_AA)
    cv2.putText(result,'Left Confidence : %.2f '%(left_conf*100.),(60,180), font, 1,(255,255,255),2,cv2.LINE_AA)
    cv2.putText(result,'Right Confidence : %.2f '%(right_conf*100.),(60,220), font, 1,(255,255,255),2,cv2.LINE_AA)

    return result
In [33]:
# -----------------------------------------------------------------------------------
# Draw the lines on the image
def draw_lines(binary_warped, left_fitx, right_fitx, ploty, Minv, undistorted):
    
    # Create an image to draw the lines on
    warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    color_warp2 = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Redo the left right points with smaller margin
    margin = 10
    margin_c = 3
    left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
    left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
    left_line_pts = np.hstack((left_line_window1, left_line_window2))
    right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
    right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
    right_line_pts = np.hstack((right_line_window1, right_line_window2))
    
    center_line_window1 = np.array([np.transpose(np.vstack([(left_fitx + right_fitx)/2-margin_c, ploty]))])
    center_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(left_fitx + right_fitx)/2 + margin_c, ploty])))])
    center_line_pts = np.hstack((center_line_window1, center_line_window2))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
    cv2.fillPoly(color_warp2, np.int_([left_line_pts]), (255, 255, 255))
    cv2.fillPoly(color_warp2, np.int_([right_line_pts]), (255, 255, 255))
    cv2.fillPoly(color_warp2, np.int_([center_line_pts]), (255, 255, 0))
    color_warp = cv2.addWeighted(color_warp, 1.0, color_warp2, 1.0, 0)

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    img = undistorted
    img_size = (img.shape[1], img.shape[0])
    newwarp = cv2.warpPerspective(color_warp, Minv, img_size) 
    # Combine the result with the original image
    result = cv2.addWeighted(undistorted, 1, newwarp, 1.0, 0.0)
    
    return result
In [34]:
plt.imshow(img_undistort)
Out[34]:
<matplotlib.image.AxesImage at 0x12e9148d0>
In [35]:
# Sliding Window
out_img, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf = sliding_windows(histogram, binary_warped, count)
    
# Draw the line on the images
result = draw_lines(binary_warped, left_fitx, right_fitx, ploty, Minv, img_undistort)
    
# Find radius of curvature and lane center
result = rad_curve(result, left_fit, right_fit, ploty, lefty, righty, leftx, rightx, left_conf, right_conf)

# Show the result
plt.figure(figsize=(10,5))
plt.imshow(result)
# plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
Out[35]:
<matplotlib.image.AxesImage at 0x12b1e7c18>
In [36]:
def process_image(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_undistort, binary_warped, Minv = pipeline(img)
    
    bird_hist = hist_binary(binary_warped[:,:])

    # Use histogram from step 4a
    histogram = bird_hist #np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
    detected = False
    count = 0
    store_left_fit = np.zeros((6,3))
    store_right_fit = np.zeros((6,3))
#     global count, detected, left_fit, right_fit, store_left_fit, store_right_fit
    
    out_img, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf = sliding_windows(histogram, binary_warped, count)

    result, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf = find_lines(left_fit, right_fit, binary_warped, count)

    # Sliding Window
    out_img, left_fit, right_fit, left_fitx, right_fitx, ploty, leftx, rightx, lefty, righty, detected, left_conf, right_conf = sliding_windows(histogram, binary_warped, count)

    # Draw the line on the images
    result = draw_lines(binary_warped, left_fitx, right_fitx, ploty, Minv, img_undistort)

    # Find radius of curvature and lane center
    result = rad_curve(result, left_fit, right_fit, ploty, lefty, righty, leftx, rightx, left_conf, right_conf)
    
    bgr_img = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
    return bgr_img
In [37]:
# Look at all the test images
images = glob.glob('./test_images/test*.jpg')
results = []
i = 0
detected = False
count = 0
store_left_fit = np.zeros((6,3))
store_right_fit = np.zeros((6,3))


plt.figure(figsize=(20,15))

for fname in images:    
    img = cv2.imread(fname)
    result = process_image(img)
    plt.subplot(3, 2, i+1)
    plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
    i = i + 1
    
for j in range(6):
    plt.subplot(3, 2, j+1)
    plt.title(j+1)

# fig = plt.gcf()
# fig.set_size_inches(10, 30)
plt.show()
In [38]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip

def save_image(img):
#     plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.imshow(img)
    plt.imsave('./saved_images/challenge.jpg',img)
    return img
    
detected = False
count = 0
store_left_fit = np.zeros((1261,3))
store_right_fit = np.zeros((1261,3))
white_output = 'P4_finalvideo_binary.mp4'
clip1 = VideoFileClip("project_video.mp4")#.subclip(43,45)
white_clip = clip1.fl_image(process_image)
#white_clip = clip1.fl_image(save_image)
#plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
%time white_clip.write_videofile(white_output, audio=False)
[MoviePy] >>>> Building video P4_finalvideo_binary.mp4
[MoviePy] Writing video P4_finalvideo_binary.mp4
100%|█████████▉| 1260/1261 [05:24<00:00,  3.88it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: P4_finalvideo_binary.mp4 

CPU times: user 20min 23s, sys: 1min 31s, total: 21min 55s
Wall time: 5min 24s
In [ ]: